PyTorch Imports


In [7]:
import torch
import sys
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable

from sklearn import cross_validation
from sklearn import metrics
from sklearn.metrics import roc_auc_score, log_loss, roc_auc_score, roc_curve, auc
from sklearn.cross_validation import StratifiedKFold, ShuffleSplit, cross_val_score, train_test_split

print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__CUDA VERSION')
from subprocess import call
# call(["nvcc", "--version"]) does not work
! nvcc --version
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
# call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())

print ('Available devices ', torch.cuda.device_count())
print ('Current cuda device ', torch.cuda.current_device())

import numpy
import numpy as np

use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
Tensor = FloatTensor

import pandas
import pandas as pd

import logging
handler=logging.basicConfig(level=logging.INFO)
lgr = logging.getLogger(__name__)
%matplotlib inline

# !pip install psutil
import psutil
import os
def cpuStats():
        print(sys.version)
        print(psutil.cpu_percent())
        print(psutil.virtual_memory())  # physical memory usage
        pid = os.getpid()
        py = psutil.Process(pid)
        memoryUse = py.memory_info()[0] / 2. ** 30  # memory use in GB...I think
        print('memory GB:', memoryUse)

cpuStats()


__Python VERSION: 2.7.12 (default, Nov 19 2016, 06:48:10) 
[GCC 5.4.0 20160609]
__pyTorch VERSION: 0.2.0_2
__CUDA VERSION
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2016 NVIDIA Corporation
Built on Tue_Jan_10_13:22:03_CST_2017
Cuda compilation tools, release 8.0, V8.0.61
__CUDNN VERSION: 6021
__Number CUDA Devices: 1
__Devices
Active CUDA Device: GPU 0
Available devices  1
Current cuda device  0
2.7.12 (default, Nov 19 2016, 06:48:10) 
[GCC 5.4.0 20160609]
27.8
svmem(total=12596776960, available=4941406208, percent=60.8, used=7322054656, free=2909753344, active=7577300992, inactive=1334910976, buffers=68300800, cached=2296668160, shared=26451968)
memory GB: 1.17856216431
/usr/local/lib/python2.7/dist-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)

No GPU ... ?

---------------------------------------------------------------------------
OSError                                   Traceback (most recent call last)
<ipython-input-3-64c0769366fe> in <module>()
     36 print('__Number CUDA Devices:', torch.cuda.device_count())
     37 print('__Devices')
---> 38 call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
     39 print('Active CUDA Device: GPU', torch.cuda.current_device())
     40

In [8]:
# Torch CPU
# !pip install http://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp27-cp27mu-manylinux1_x86_64.whl
# !pip install torchvision

CUDA Trick


In [9]:
use_cuda = torch.cuda.is_available()
# use_cuda = False

FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
Tensor = FloatTensor

Setting up global variables

  • Root folder
  • Image folder
  • Image Label folder

In [10]:
DATA_ROOT ='/root/data/amazon/'
IMG_PATH = DATA_ROOT + '/train-jpg/'
IMG_EXT = '.jpg'
IMG_DATA_LABELS = DATA_ROOT + '/train_v2.csv'

The Torch Dataset Class


In [11]:
try:
    from PIL import Image
except ImportError:
    import Image
    
class GenericImageDataset(Dataset):    

    def __init__(self, csv_path, img_path, img_ext, transform=None):
        
        t = time.time()        
        lgr.info('CSV path {}'.format(csv_path))
        lgr.info('IMG path {}'.format(img_path))        
        
        assert img_ext in ['.jpg']
        
        tmp_df = pd.read_csv(csv_path, header=None)
                        
        self.mlb = MultiLabelBinarizer()
        self.img_path = img_path
        self.img_ext = img_ext
        self.transform = transform

        self.X_train = tmp_df[0]        
#         self.X_train = self.X_train.ix[1:]
        
        self.y_train = self.mlb.fit_transform(tmp_df[1].str.split()).astype(np.float32)         
                
#         lgr.info("DF:\n" + str (self.X_train))
#         lgr.info ("self.y_train:\n" + str(self.y_train))

        lgr.info('[*]Dataset loading time {}'.format(time.time() - t))
        lgr.info('[*] Data size is {}'.format(len(self)))

    def __getitem__(self, index):
#         lgr.info ("__getitem__:" + str(index))
        path=self.img_path + self.X_train[index] + self.img_ext
#         lgr.info (" --- get item path:" + path)
        img = Image.open(path)
        img = img.convert('RGB')
        if self.transform is not None: # TypeError: batch must contain tensors, numbers, or lists; 
                                     #found <class 'PIL.Image.Image'>
            img = self.transform(img)
#             print (str (type(img))) # <class 'torch.FloatTensor'>                
        label = torch.from_numpy(self.y_train[index])
        return img, label

    def __len__(self):
        l=len(self.X_train.index)
#         lgr.info ("Lenght:" +str(l))
        return (l)       

    @staticmethod        
    def imshow(img):
        img = img / 2 + 0.5     # unnormalize
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)))

    @staticmethod    
    def flaotTensorToImage(img, mean=0, std=1):
        """convert a tensor to an image"""
        img = np.transpose(img.numpy(), (1, 2, 0))
        img = (img*std+ mean)*255
        img = img.astype(np.uint8)    
        return img    
    
    @staticmethod
    def toTensor(img):
        """convert a numpy array of shape HWC to CHW tensor"""
        img = img.transpose((2, 0, 1)).astype(np.float32)
        tensor = torch.from_numpy(img).float()
        return tensor/255.0

    @staticmethod
    def cpuStats():
        print(sys.version)
        print(psutil.cpu_percent())
        print(psutil.virtual_memory())  # physical memory usage
        pid = os.getpid()
        py = psutil.Process(pid)
        memoryUse = py.memory_info()[0] / 2. ** 30  # memory use in GB...I think
        print('memory GB:', memoryUse)

The Torch transforms.ToTensor() methood

  • Converts: a PIL.Image or numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].

In [12]:
# transformations = transforms.Compose([transforms.ToTensor()])
transformations = transforms.Compose([transforms.Scale(32),transforms.ToTensor()])

The Torch DataLoader Class

  • Will load our GenericImageDataset
  • Can be regarded as a list (or iterator, technically).
  • Each time it is invoked will provide a minibatch of (img, label) pairs.

In [13]:
dset_train = GenericImageDataset(IMG_DATA_LABELS,
                                 IMG_PATH,
                                 IMG_EXT,transformations)

# train_loader = DataLoader(dset_train,
#                           batch_size=64,
#                           shuffle=False,
#                           num_workers=1 # 1 for CUDA
#                          # pin_memory=True # CUDA only
#                          )
# import torchvision


INFO:__main__:CSV path /root/data/amazon//train_v2.csv
INFO:__main__:IMG path /root/data/amazon//train-jpg/

IOErrorTraceback (most recent call last)
<ipython-input-13-b374077abc88> in <module>()
      1 dset_train = GenericImageDataset(IMG_DATA_LABELS,
      2                                  IMG_PATH,
----> 3                                  IMG_EXT,transformations)
      4 
      5 # train_loader = DataLoader(dset_train,

<ipython-input-11-24c81a072b04> in __init__(self, csv_path, img_path, img_ext, transform)
     14         assert img_ext in ['.jpg']
     15 
---> 16         tmp_df = pd.read_csv(csv_path, header=None)
     17 
     18         self.mlb = MultiLabelBinarizer()

/usr/local/lib/python2.7/dist-packages/pandas/io/parsers.pyc in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, skip_footer, doublequote, delim_whitespace, as_recarray, compact_ints, use_unsigned, low_memory, buffer_lines, memory_map, float_precision)
    703                     skip_blank_lines=skip_blank_lines)
    704 
--> 705         return _read(filepath_or_buffer, kwds)
    706 
    707     parser_f.__name__ = name

/usr/local/lib/python2.7/dist-packages/pandas/io/parsers.pyc in _read(filepath_or_buffer, kwds)
    443 
    444     # Create the parser.
--> 445     parser = TextFileReader(filepath_or_buffer, **kwds)
    446 
    447     if chunksize or iterator:

/usr/local/lib/python2.7/dist-packages/pandas/io/parsers.pyc in __init__(self, f, engine, **kwds)
    812             self.options['has_index_names'] = kwds['has_index_names']
    813 
--> 814         self._make_engine(self.engine)
    815 
    816     def close(self):

/usr/local/lib/python2.7/dist-packages/pandas/io/parsers.pyc in _make_engine(self, engine)
   1043     def _make_engine(self, engine='c'):
   1044         if engine == 'c':
-> 1045             self._engine = CParserWrapper(self.f, **self.options)
   1046         else:
   1047             if engine == 'python':

/usr/local/lib/python2.7/dist-packages/pandas/io/parsers.pyc in __init__(self, src, **kwds)
   1682         kwds['allow_leading_cols'] = self.index_col is not False
   1683 
-> 1684         self._reader = parsers.TextReader(src, **kwds)
   1685 
   1686         # XXX

pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.__cinit__()

pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._setup_parser_source()

IOError: File /root/data/amazon//train_v2.csv does not exist

Train Validation Split

  • Since there is no train_test_split method in PyTorcj, we have to split a TRAINNING dataset into training and validation sets.

In [26]:
class FullTrainningDataset(torch.utils.data.Dataset):
    def __init__(self, full_ds, offset, length):
        self.full_ds = full_ds
        self.offset = offset
        self.length = length
        assert len(full_ds)>=offset+length, Exception("Parent Dataset not long enough")
        super(FullTrainningDataset, self).__init__()
        
    def __len__(self):
        return self.length
    
    def __getitem__(self, i):
        return self.full_ds[i+self.offset]
    
validationRatio=0.22    

def trainTestSplit(dataset, val_share=validationRatio):
    val_offset = int(len(dataset)*(1-val_share))
    return FullTrainningDataset(dataset, 0, val_offset), FullTrainningDataset(dataset, val_offset, len(dataset)-val_offset)

 
train_ds, val_ds = trainTestSplit(dset_train)

train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64, shuffle=False, num_workers=1)
val_loader = torch.utils.data.DataLoader(val_ds, batch_size=64, shuffle=False, num_workers=1)

Test the DataLoader Class


In [27]:
imagesToShow=4

for i, data in enumerate(train_loader, 0):
    lgr.info('i=%d: '%(i))            
    images, labels = data            
    num = len(images)
    
    ax = plt.subplot(1, imagesToShow, i + 1)
    plt.tight_layout()
    ax.set_title('Sample #{}'.format(i))
    ax.axis('off')
    
    for n in range(num):
        image=images[n]
        label=labels[n]
        plt.imshow (GenericImageDataset.flaotTensorToImage(image))
        
    if i==imagesToShow-1:
        break


INFO:__main__:i=0: 
INFO:__main__:i=1: 
INFO:__main__:i=2: 
INFO:__main__:i=3: 

In [28]:
for i, data in enumerate(val_loader, 0):
    lgr.info('i=%d: '%(i))            
    images, labels = data            
    num = len(images)
    
    ax = plt.subplot(1, imagesToShow, i + 1)
    plt.tight_layout()
    ax.set_title('Sample #{}'.format(i))
    ax.axis('off')
    
    for n in range(num):
        image=images[n]
        label=labels[n]
        plt.imshow (GenericImageDataset.flaotTensorToImage(image))
        
    if i==imagesToShow-1:
        break


INFO:__main__:i=0: 
INFO:__main__:i=1: 
INFO:__main__:i=2: 
INFO:__main__:i=3: 

The NN model

  • We will use a simple CNN with conv(3x3) -> bn -> relu -> pool(4x4) -> fc.

  • In PyTorch, a model is defined by a subclass of nn.Module. It has two methods:

  • __init__: constructor. Create layers here. Note that we don't define the connections between layers in this function.

  • forward(x): forward function. Receives an input variable x. Returns a output variable. Note that we actually connect the layers here dynamically.

In [29]:
# https://www.kaggle.com/mratsim/starting-kit-for-pytorch-deep-learning/code/notebook
class Net(nn.Module):
    def __init__(self, initKernel='uniform'):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(2304, 256)
        self.fc2 = nn.Linear(256, 17)
        
        # xavier initializer
        if initKernel == 'uniform':
            nn.init.xavier_uniform(self.conv1.weight, gain=np.sqrt(2.0))
        else:
            nn.init.kaiming_normal(self.conv1.weight)                    

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(x.size(0), -1) # Flatten layer
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
#         return x
        return F.sigmoid(x)
#         return F.log_softmax(x)
    
    
if use_cuda:
    lgr.info ("Using the GPU")
    model = Net().cuda() # On GPU
else:
    lgr.info ("Using the CPU")
    model = Net() # On CPU

lgr.info('Model {}'.format(model))


INFO:__main__:Using the GPU
INFO:__main__:Model Net (
  (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1))
  (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
  (conv2_drop): Dropout2d (p=0.5)
  (fc1): Linear (2304 -> 256)
  (fc2): Linear (256 -> 17)
)

Loss and Optimizer

  • Select a loss function and the optimization algorithm.

In [30]:
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
# optimizer = optim.SGD(net.parameters(), lr=1e-1,momentum=0.9, weight_decay=1e-4)
lgr.info('Optimizer {}'.format(optimizer))

# criterion = nn.ClassNLLCriterion() -- a negative log-likelihood criterion for multi-class classification


INFO:__main__:Optimizer <torch.optim.sgd.SGD object at 0x7fae7838d9d0>

In [31]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
 

    
clf=model 
opt= optimizer
loss_history = []
acc_history = []
 
def train(epoch):
    clf.train() # set model in training mode (need this because of dropout)
     
    # dataset API gives us pythonic batching 
    for batch_idx, (data, target) in enumerate(train_loader):
        
        if use_cuda:
            data, target = Variable(data.cuda(async=True)), Variable(target.cuda(async=True)) # On GPU                
        else:            
            data, target = Variable(data), Variable(target) # RuntimeError: expected CPU tensor (got CUDA tensor)                           
                 
        # forward pass, calculate loss and backprop!
        opt.zero_grad()
        preds = clf(data)
        if use_cuda:
            loss = F.binary_cross_entropy(preds, target).cuda()
#             loss = F.log_softmax(preds).cuda() # TypeError: log_softmax() takes exactly 1 argument (2 given)
#             loss = F.nll_loss(preds, target).cuda() # https://github.com/torch/cutorch/issues/227
            
        else:
            loss = F.binary_cross_entropy(preds, target)
#             loss = F.log_softmax(preds)
#             loss = F.nll_loss(preds, target.long()) # RuntimeError: multi-target not supported at /pytorch/torch/lib/THNN/generic/ClassNLLCriterion.c:22
        loss.backward()
        
        opt.step()
        
        
        if batch_idx % 100 == 0:
            loss_history.append(loss.data[0])
            lgr.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
            epoch, batch_idx * len(data), len(train_loader.dataset),
            100. * batch_idx / len(train_loader), loss.data[0]))              

            
start_time = time.time()    

for epoch in range(1, 2):
    print("Epoch %d" % epoch)
    train(epoch)    
end_time = time.time()
print ('{} {:6.3f} seconds'.format('GPU:', end_time-start_time))
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(loss_history)
plt.show()


Epoch 1
INFO:__main__:Train Epoch: 1 [0/31573 (0%)]	Loss: 0.695175
INFO:__main__:Train Epoch: 1 [6400/31573 (20%)]	Loss: 0.488698
INFO:__main__:Train Epoch: 1 [12800/31573 (40%)]	Loss: 0.256287
INFO:__main__:Train Epoch: 1 [19200/31573 (61%)]	Loss: 0.277272
INFO:__main__:Train Epoch: 1 [25600/31573 (81%)]	Loss: 0.298735
GPU: 37.892 seconds

In [18]:
# def test(epoch):
#     clf.eval() # set model in inference mode (need this because of dropout)
#     test_loss = 0
#     correct = 0
     
#     for data, target in val_loader:
        
#         if use_cuda:
#             data, target = Variable(data.cuda(async=True)), Variable(target.cuda(async=True)) # On GPU                
#         else:            
#             data, target = Variable(data), Variable(target) # RuntimeError: expected CPU tensor (got CUDA tensor)               
         
#         output = clf(data)
#         test_loss += F.binary_cross_entropy(output, target).data[0]
#         pred = output.data.max(1)[1] # get the index of the max log-probability
#         print ("Shape of pred:" + str(pred.shape))
#         target_data_long=target.data.long()
#         print ("Shape of target_data_long:" + str(target_data_long.shape))
#         correct += pred.eq(target_data_long).cpu().sum()
 
#     test_loss = test_loss
#     test_loss /= len(test_loader) # loss function already averages over batch size
#     accuracy = 100. * correct / len(test_loader.dataset)
#     acc_history.append(accuracy)
#     print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
#         test_loss, correct, len(test_loader.dataset),
#         accuracy))

    
# for epoch in range(1, 3):
#     print("Epoch %d" % epoch)
#     test(epoch)

In [214]:
%%bash
jupyter nbconvert \
    --to=slides \
    --reveal-prefix=https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.2.0/ \
    --output=py09.html \
    './09 PyTorch Kaggle Image Data-set loading with CNN'


/usr/local/lib/python2.7/dist-packages/requests/__init__.py:80: RequestsDependencyWarning: urllib3 (1.22) or chardet (2.3.0) doesn't match a supported version!
  RequestsDependencyWarning)
[NbConvertApp] Converting notebook ./09 PyTorch Kaggle Image Data-set loading with CNN.ipynb to slides
[NbConvertApp] Writing 397955 bytes to ./py09.html.slides.html

In [ ]: